home *** CD-ROM | disk | FTP | other *** search
Wrap
/* * VideoWrapper.cpp * * Copyright (C) Alberto Vigata - January 2000 ultraflask@yahoo.com * * * This file is part of FlasKMPEG, a free MPEG to MPEG/AVI converter * * FlasKMPEG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * FlasKMPEG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Make; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include "VideoWrapper.h" #include "..\Subpic\Subpic.h" extern "C" { #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <fcntl.h> #include <assert.h> #define GLOBAL #include "config.h" #include "global.h" } #define ABS(x) ((x>0)? x:-x) //Global variables for syncing with decoding thread VideoWrapper *myVideo; __int64 rawPTS[1024]; int gopPOS; bool closedGOP; bool synced=false; int gopNum; /* private prototypes */ static int video_sequence _ANSI_ARGS_((int *framenum)); static int Decode_Bitstream _ANSI_ARGS_((void)); static int Headers _ANSI_ARGS_((void)); static void Initialize_Sequence _ANSI_ARGS_((void)); static void Initialize_Decoder _ANSI_ARGS_((void)); void Deinitialize_Sequence _ANSI_ARGS_((void)); static void Process_Options _ANSI_ARGS_((int argc, char *argv[])); static void InitClip(); static void DeInitClip(); void NextSequenceHeader(){ while (Show_Bits(32)!=0x000001B3) Flush_Buffer(1); } #define RESERVED 1 static double MPEG1aspect_ratio_Table[16] = { 0.0, 1.0000, 0.6735, 0.7031, 0.7615, 0.8055, 0.8437, 0.8935, 0.9375, 0.9815, 1.0255, 1.0695, 1.1250, 1.1575, 1.2015, RESERVED }; char *MPEG1aspectStrings[16]= { "Unexpected" , "1.0 Square Sample", "SAR 0.6735", "16:9 PAL", "SAR 0.7615" , "SAR 0.8055", "16:9 NTSC", "SAR 0.8935", "4:3 CCIR601 PAL" , "SAR 0.9815", "1.0255", "1.0695", "4:3 CCIR601 NTSC", "", "", ""}; static double frameRateTable[16]= { 0.0, (24000.0/1001.0), 24.0, 25.0, (30000.0/1001.0), 30.0, 50.0, (60000.0/1001.0), 60.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }; static double MPEG2aspect_ratio_Table[16] = { 0.0, 1, (3.0/4.0), (9.0/16.0), (1.0/2.21), RESERVED, RESERVED, RESERVED, RESERVED, RESERVED, RESERVED, RESERVED, RESERVED, RESERVED, RESERVED, RESERVED }; char *MPEGframeRateStrings[16]={ "Unexpected", "FILM at 23.976 fps", "FILM at 24 fps", "PAL at 25 fps", "NTSC at 29.97 fps", "NTSC at 30 fps", "PAL at 50 fps", "NTSC at 59.94 fps", "NTSC at 60 fps", "","","","","","","" }; char *MPEG2aspectStrings[16]= {"Unexpected", "1.0 Square Sample", "4:3","16:9", "2.21:1","","","", "","","","", "","","",""}; int init_recons(TRecImage *image, int xsize, int ysize) { if(image){ image->im.Y = (YUVPixel *)malloc(xsize*ysize); image->im.U = (YUVPixel *)malloc((xsize*ysize)>>2); image->im.V = (YUVPixel *)malloc((xsize*ysize)>>2); image->im.Yxsize = xsize; image->im.Yysize = ysize; image->im.Cxsize = xsize>>1; image->im.Cysize = ysize>>1; image->state = FREE; return 1; } else return 0; } void deinit_recons(TRecImage *image) { if( image ){ free(image->im.Y); image->im.Y = NULL; free(image->im.U); image->im.U = NULL; free(image->im.V); image->im.V = NULL; } } VideoWrapper::VideoWrapper(char *inputFile, int streamID, int subStreamID, int subpic_streamID, int subpic_substreamID, int mode) { int code,ret; VideoWrapper::streamID = streamID; VideoWrapper::subStreamID = subStreamID; VideoWrapper::subpic_streamID = subpic_streamID; VideoWrapper::subpic_substreamID = subpic_substreamID; time=0; error=0; pictureWidth=0; picutreHeight=0; myVideo=this; stopDecoding=false; //Do inits ld = &base; /* select base layer context */ Frame_Store_Flag = 1; Output_Type=2; Output_Picture_Filename="pic%d"; System_Stream_Flag = 0; //Start demuxer SetInput(inputFile); //GuessTotalFrames() //STARTING DECODER InitClip(); inp->SetStreamPos(0); //GUESS STREAM TYPE StartReadLPES(); Initialize_Buffer(); next_start_code(); code = Show_Bits(32); switch(code) { case SEQUENCE_HEADER_CODE: break; case PACK_START_CODE: System_Stream_Flag = 1; case VIDEO_ELEMENTARY_STREAM: System_Stream_Flag = 1; break; default: sprintf(Error_Text,"Unable to recognize stream type\n"); Error(Error_Text); break; } detectFrameRate(); //GET SEQUENCE PROPERTIES inp->SetStreamPos(0); StartReadLPES(); Initialize_Buffer(); Initialize_Decoder(); NextSequenceHeader(); /* Headers returns when end of sequence (0) or picture header has been parsed (1) */ ret = Headers(); if(ret==1) { /*video_sequence(&Bitstream_Framenum);*/ //VIDEO SEQUENCE.... Sequence_Framenum=0; Initialize_Sequence(); pictureWidth=Coded_Picture_Width; picutreHeight=Coded_Picture_Height; updateVideoStats(); init_recons(&reca, pictureWidth, picutreHeight); init_recons(&recb, pictureWidth, picutreHeight); //Deinitialize_Sequence(); } inp->SetStreamPos(0); StartReadLPES(); Initialize_Buffer(); synced=true; // Allocate memory for reconstruted images } VideoWrapper::~VideoWrapper() { // Deallocating memory deinit_recons(&reca); deinit_recons(&recb); DeInitClip(); Deinitialize_Sequence(); } VideoWrapper::Start(TVideoOptions *opt) { //Retrieve first sequence_header parameters ResetRawPTS(); time=0; justStarted = true; //Read Linear PES init StartReadLPES(); //Decoder Configuration timeCode.frame=0; timeCode.hour=0; timeCode.minute=0; timeCode.second=0; stopDecoding=false; VideoWrapper::idctType = opt->idctType; recons_progressive = opt->recons_progressive; //PTS variables TFrameInfo temp; temp.PTS = 0; temp.SCR = 0; p.actual = temp; p.backward = temp; p.fordward = temp; p.image = temp; outGopPos=0; internalPTS=0; PTSlocked=false; SCRlocked=true; // Pull down stuff pullDownDetected=false; pulldown_state = 3; interlaced_field_delay = (__int64)(MPEG2_CLK_REF*(frameDelay))>>1; progressive_frame_delay = (__int64)(MPEG2_CLK_REF*(1.25*frameDelay)); seqIsProgressive=false; previousPicSCR=0; prevProgressiveFrame=progressive_sequence; prevRepeatFirstField=0; // if(synced==false) gopNum= -1; Bitstream_Framenum = 0; Initialize_Decoder(); sequencePos=SEQ_FIRSTTIME; // Double Buffer reca.state = FREE; recb.state = FREE; dbuf.rec[0] = &reca; dbuf.rec[1] = &recb; image_zero(&reca.im); image_zero(&recb.im); // Subpic init subpic_init(); return 0; } #define IMAGE_COPY(x) image_copy(&dbuf.rec[x]->im, &YUV) #define FIELD_COPY(x,y) field_copy(&dbuf.rec[x]->im, &YUV, y) #define FIELD_COPY_SWAP(x,y) field_copy_swap(&dbuf.rec[x]->im, &YUV, y) #define GET_DB(x) (&dbuf.rec[x]->im) #define GET_PTS(x) (dbuf.rec[x]->PTS) #define SET_PTS(x,y) (dbuf.rec[x]->PTS=y) #define SET_FREE(x) (dbuf.rec[x]->state = FREE) #define SET_FULL(x) (dbuf.rec[x]->state = FULL) #define DB_0 0 #define DB_1 1 void VideoWrapper::swap_db() { TRecImage *temp; temp = dbuf.rec[0]; dbuf.rec[0] = dbuf.rec[1]; dbuf.rec[1] = temp; } i64 inline VideoWrapper::get_time(int adjust_clock) { i64 present; i64 delay = (__int64)(MPEG2_CLK_REF*(frameDelay)); ui32 half_delay = (int)delay>>1; if(p.image.PTS){ internalPTS = p.image.PTS; if(adjust_clock==EQUAL) present = internalPTS; if(adjust_clock==PLUS) present = internalPTS = internalPTS + half_delay; if(adjust_clock==MINUS) present = internalPTS = internalPTS - half_delay; internalPTS += i64(recons_progressive ? (1.25*(double)delay) : delay); } else{ present = internalPTS; internalPTS += i64(recons_progressive ? (1.25*(double)delay) : delay); } return present; } int VideoWrapper::guess_32pulldown(){ // pulldown_state: input picture state // // 3:2 pulldown frames source // T: top field B: bottom field // // 1st 2nd 3d 4th // // TBT BT BTB TB // // 4 states if(progressive_sequence){ pullDownDetected = false; pulldown_state = 3; } else{ if(p.image.progressive_frame) { if(p.image.top_field_first){ switch( pulldown_state ) { case 0: pullDownDetected = false; pulldown_state = 3; break; case 1: pullDownDetected = false; pulldown_state = 3; break; case 2: if(!p.image.repeat_first_field){ //Yehaa!! pulldown_state = 3; pullDownDetected = true; } else{ pulldown_state = 3; pullDownDetected = false; } break; case 3: //First in sequence if(p.image.repeat_first_field){ pulldown_state = 0; } else{ pulldown_state = 3; pullDownDetected = false; } break; } } else{ //Bottom field first switch( pulldown_state ) { case 0: if(p.image.repeat_first_field){ pulldown_state = 3; pullDownDetected = false; } else pulldown_state = 1; break; case 1: if(p.image.repeat_first_field) pulldown_state = 2; else{ pulldown_state = 3; pullDownDetected = false; } break; case 2: pulldown_state = 3; pullDownDetected = false; break; case 3: pulldown_state = 3; pullDownDetected = false; break; } } } else{ pullDownDetected = false; pulldown_state = 3; } } return 0; } void VideoWrapper::adjust_pulldown_timing(){ // 24 fps sequence // // [--------][-------][------][------] // [ | ][ | ][ | ][ | ][ | ] if(p.image.PTS){ switch(pulldown_state) { case 0: //32 PTS equals real one break; case 1: // PTS for 3 fields of 3:2 sequence p.image.PTS = p.image.PTS - 3*interlaced_field_delay + progressive_frame_delay; break; case 2: //3:2 PTS equals the real one break; case 3: p.image.PTS = p.image.PTS - 8*interlaced_field_delay + 3*progressive_frame_delay; break; } } } VideoWrapper::GetFrame(presInfo *pInfo, YUVImageTag **frame) { int val; ui64 frame_delay = (__int64)(MPEG2_CLK_REF*(frameDelay)); *frame = NULL; // if there are full frames in the double buffer // output them if(dbuf.rec[0]->state == FULL){ //Return frame 0 //Swap buffers and signal as FREE dbuf.rec[0]->state = FREE; *frame = &dbuf.rec[0]->im; pInfo->imagePTS = GET_PTS(0); swap_db(); return 1; } val = get_frame(); if(!val) return 0; //if(recons_progressive){ // guess_32pulldown(); // if(!pullDownDetected) // int i=0; //adjust_pulldown_timing(); //} //else{ // pullDownDetected = false; //} // if partially reconstructed frame // if(dbuf.rec[0]->state != FREE){ //the picture has either bottom or top field reconstructed if(progressive_sequence){ //Well, here MPEG2 standard defines // to output 1, 2 or 3 consecutive frames // for 60 fps progressive output // let's output just one // This is the case of MPEG1 too IMAGE_COPY(0); SET_PTS(0, get_time(EQUAL) ); dbuf.rec[0]->state = FREE; *frame = GET_DB(0); pInfo->imagePTS = GET_PTS(0); goto get_frame_end; } else{ if(p.image.progressive_frame) { if(recons_progressive){ IMAGE_COPY(0); SET_PTS(0, get_time(EQUAL)); dbuf.rec[0]->state = FREE; *frame = GET_DB(0); pInfo->imagePTS = GET_PTS(0); goto get_frame_end; } if(p.image.top_field_first){ IMAGE_COPY(0); SET_PTS(0, get_time(EQUAL)); dbuf.rec[0]->state = FREE; if(p.image.repeat_first_field ){ FIELD_COPY(1, TOP_FIELD); dbuf.rec[1]->state = TOP_FIELD; } *frame = GET_DB(0); pInfo->imagePTS = GET_PTS(0); swap_db(); goto get_frame_end; } else{//Bottom field first FIELD_COPY(0, BOTTOM_FIELD); SET_PTS(0, get_time(MINUS) ); SET_FREE(0); if(justStarted) FIELD_COPY_SWAP(0, BOTTOM_FIELD); FIELD_COPY(1, TOP_FIELD); dbuf.rec[1]->state = TOP_FIELD; if(p.image.repeat_first_field ){ FIELD_COPY(1, BOTTOM_FIELD); // Reset PTS for the repeat_first_field if any p.image.PTS = 0; SET_PTS(1, get_time(EQUAL) ); SET_FULL(1); } *frame = GET_DB(0); pInfo->imagePTS = GET_PTS(0); swap_db(); goto get_frame_end; } } else{ if( p.image.picture_structure == FRAME_PICTURE ){ if(p.image.top_field_first){ IMAGE_COPY(0); SET_PTS(0, get_time(EQUAL)); SET_FREE(0); *frame = GET_DB(0); pInfo->imagePTS = GET_PTS(0); goto get_frame_end; } else{//bottom field first FIELD_COPY(0, BOTTOM_FIELD); SET_PTS(0, get_time(MINUS) ); SET_FREE(0); if(justStarted) FIELD_COPY_SWAP(0, BOTTOM_FIELD); FIELD_COPY(1, TOP_FIELD); dbuf.rec[1]->state = TOP_FIELD; *frame = GET_DB(0); pInfo->imagePTS = GET_PTS(0); swap_db(); goto get_frame_end; } } else{ if(p.image.picture_structure == TOP_FIELD){ IMAGE_COPY(DB_0); SET_PTS(0, get_time(EQUAL) ); SET_FREE(0); *frame = GET_DB(0); pInfo->imagePTS = GET_PTS(0); goto get_frame_end; } else{//field picture. BOTTOM FIELD FIELD_COPY(DB_0, BOTTOM_FIELD); SET_PTS(0, get_time(MINUS) ); SET_FREE(0); if(justStarted) FIELD_COPY_SWAP(0, BOTTOM_FIELD); FIELD_COPY(DB_1, TOP_FIELD); dbuf.rec[1]->state = TOP_FIELD; *frame = GET_DB(0); pInfo->imagePTS = GET_PTS(0); swap_db(); goto get_frame_end; } } } } get_frame_end: justStarted = false; return val; } int VideoWrapper::get_frame() { //xsize, ysize: DibArray output sizes //If Everything is OK int ret; //pictureWidth=Coded_Picture_Width; //picutreHeight=Coded_Picture_Height; updateVideoStats(); time= (ui32)(double(GetTime())/ (double)27000); //miliseconds //In case there is a problem YUV.U=NULL; YUV.V=NULL; YUV.Y=NULL; YUV.Yxsize=-1; timeCode.hour= hour; timeCode.minute= minute; timeCode.second= sec; timeCode.frame= frame; if(stopDecoding){ error=PLAYER_STOPPED; return 0; } decode_picture_begin: switch( sequencePos ) { case SEQ_FIRSTTIME: goto start_sequence; //This is the first time break; case SEQ_FINISHED: start_sequence: ret = Headers(); if(ret==1) { /*video_sequence(&Bitstream_Framenum);*/ //VIDEO SEQUENCE.... Sequence_Framenum=0; /* decode picture whose header has already been parsed in Decode_Bitstream() */ ret=Decode_Picture(Bitstream_Framenum, Sequence_Framenum); /* update picture numbers */ if (!Second_Field) { Bitstream_Framenum++; Sequence_Framenum++; } sequencePos=SEQ_STARTED; if(ret==0) goto decode_picture_begin; // updatePTS(pInfo); return 1; } else goto end; //End of bitstream file break; case SEQ_STARTED: if((ret=Headers())) { ret = Decode_Picture(Bitstream_Framenum, Sequence_Framenum); //FileProgress.currentPosition=tell(base.Infile); if (!Second_Field) { Bitstream_Framenum++; Sequence_Framenum++; } if(ret==0) goto decode_picture_begin; // updatePTS(pInfo); // return 1; } else { /* put last frame */ if (Sequence_Framenum!=0) { Output_Last_Frame_of_Sequence(Bitstream_Framenum); } //Deinitialize_Sequence(); isDecoding=false; #ifdef VERIFY Clear_Verify_Headers(); #endif /* VERIFY */ sequencePos=SEQ_FINISHED; return 1; } break; } end: error=END_OF_STREAM; return 0; } VideoWrapper::Stop() { time=0; subpic_free(); /* reset subpics */ return 0; } void DeInitClip(){ free(Clip-384); } void InitClip(){ int i; /* Clip table */ if (!(Clip=(unsigned char *)malloc(1024))) Error("Clip[] malloc failed\n"); Clip += 384; for (i=-384; i<640; i++) Clip[i] = (i<0) ? 0 : ((i>255) ? 255 : i); } /* IMPLEMENTAION specific rouintes */ static void Initialize_Decoder() { // gopPreviousPos=-1; gop_count=512; /* IDCT */ if (myVideo->idctType==3) Initialize_Reference_IDCT(); else if(myVideo->idctType==2) Initialize_Fast_IDCT(); } /* mostly IMPLEMENTAION specific rouintes */ static void Initialize_Sequence() { int cc, size; static int Table_6_20[3] = {6,8,12}; /* check scalability mode of enhancement layer */ if (Two_Streams && (enhan.scalable_mode!=SC_SNR) && (base.scalable_mode!=SC_DP)) Error("unsupported scalability mode\n"); /* force MPEG-1 parameters for proper decoder behavior */ /* see ISO/IEC 13818-2 section D.9.14 */ if (!base.MPEG2_Flag) { progressive_sequence = 1; progressive_frame = 1; picture_structure = FRAME_PICTURE; frame_pred_frame_dct = 1; chroma_format = CHROMA420; matrix_coefficients = 5; } /* round to nearest multiple of coded macroblocks */ /* ISO/IEC 13818-2 section 6.3.3 sequence_header() */ mb_width = (horizontal_size+15)/16; mb_height = (base.MPEG2_Flag && !progressive_sequence) ? 2*((vertical_size+31)/32) : (vertical_size+15)/16; Coded_Picture_Width = 16*mb_width; Coded_Picture_Height = 16*mb_height; //Allocate space for Output Bitmap //DibArray=malloc( Coded_Picture_Width * Coded_Picture_Height * 3 ); //DibArray = TempArray; /* ISO/IEC 13818-2 sections 6.1.1.8, 6.1.1.9, and 6.1.1.10 */ Chroma_Width = (chroma_format==CHROMA444) ? Coded_Picture_Width : Coded_Picture_Width>>1; Chroma_Height = (chroma_format!=CHROMA420) ? Coded_Picture_Height : Coded_Picture_Height>>1; /* derived based on Table 6-20 in ISO/IEC 13818-2 section 6.3.17 */ block_count = Table_6_20[chroma_format-1]; for (cc=0; cc<3; cc++) { if (cc==0) size = Coded_Picture_Width*Coded_Picture_Height; else size = Chroma_Width*Chroma_Height; if (!(backward_reference_frame[cc] = (unsigned char *)malloc(size))) Error("backward_reference_frame[] malloc failed\n"); if (!(forward_reference_frame[cc] = (unsigned char *)malloc(size))) Error("forward_reference_frame[] malloc failed\n"); if (!(auxframe[cc] = (unsigned char *)malloc(size))) Error("auxframe[] malloc failed\n"); if(Ersatz_Flag) if (!(substitute_frame[cc] = (unsigned char *)malloc(size))) Error("substitute_frame[] malloc failed\n"); if (base.scalable_mode==SC_SPAT) { /* this assumes lower layer is 4:2:0 */ if (!(llframe0[cc] = (unsigned char *)malloc((lower_layer_prediction_horizontal_size*lower_layer_prediction_vertical_size)/(cc?4:1)))) Error("llframe0 malloc failed\n"); if (!(llframe1[cc] = (unsigned char *)malloc((lower_layer_prediction_horizontal_size*lower_layer_prediction_vertical_size)/(cc?4:1)))) Error("llframe1 malloc failed\n"); } } /* SCALABILITY: Spatial */ if (base.scalable_mode==SC_SPAT) { if (!(lltmp = (short *)malloc(lower_layer_prediction_horizontal_size*((lower_layer_prediction_vertical_size*vertical_subsampling_factor_n)/vertical_subsampling_factor_m)*sizeof(short)))) Error("lltmp malloc failed\n"); } } void Error(char *text) { fprintf(stderr,text); // exit(1); } /* Trace_Flag output */ void Print_Bits(int code,int bits,int len) { int i; for (i=0; i<len; i++) printf("%d",(code>>(bits-1-i))&1); } static int Headers() { int ret; ld = &base; /* return when end of sequence (0) or picture header has been parsed (1) */ ret = Get_Hdr(); if (Two_Streams) { ld = &enhan; if (Get_Hdr()!=ret && !Quiet_Flag) fprintf(stderr,"streams out of sync\n"); ld = &base; } return ret; } static int Decode_Bitstream() { int ret; int Bitstream_Framenum; Bitstream_Framenum = 0; for(;;) { #ifdef VERIFY Clear_Verify_Headers(); #endif /* VERIFY */ ret = Headers(); if(ret==1) { ret = video_sequence(&Bitstream_Framenum); } else return(ret); } } void Deinitialize_Sequence() { int i; /* clear flags */ base.MPEG2_Flag=0; for(i=0;i<3;i++) { free(backward_reference_frame[i]); free(forward_reference_frame[i]); free(auxframe[i]); if (base.scalable_mode==SC_SPAT) { free(llframe0[i]); free(llframe1[i]); } } if (base.scalable_mode==SC_SPAT) free(lltmp); #ifdef DISPLAY if (Output_Type==T_X11) Terminate_Display_Process(); #endif } static int video_sequence(int *Bitstream_Framenumber) { // int Bitstream_Framenum; // int Sequence_Framenum; int Return_Value; return(Return_Value); } int VideoWrapper::SeekVideo(__int64 pos) { if(pos==0) synced=true; else synced=false; inp->SetStreamPos(pos); StartReadLPES(); Initialize_Buffer(); return 0; } int VideoWrapper::GetError() { int temp; temp=error; error=0; return temp; } VideoWrapper::updateVideoStats() { sprintf(sVideoSize, "%d x %d pixels", Coded_Picture_Width, Coded_Picture_Height); if(isMPEG2){ DAR=MPEG2aspect_ratio_Table[aspect_ratio_information]; strcpy(sAspectRatio,MPEG2aspectStrings[aspect_ratio_information]); } else{ DAR=(MPEG1aspect_ratio_Table[aspect_ratio_information]*(double)picutreHeight)/((double)pictureWidth); strcpy(sAspectRatio,MPEG1aspectStrings[aspect_ratio_information]); } if(progressive_frame) strcpy(sProgressive, "Progressive"); else strcpy(sProgressive, "Interlaced"); sprintf(sBitrate, "%d bps",bit_rate_value*50*8); frameRate=frameRateTable[frame_rate_code]; strcpy(sFrameRate, MPEGframeRateStrings[frame_rate_code]); detectedFrameRateCode=GetFrameRateCode(detectedFrameRate); strcpy(sDetectedFPS, MPEGframeRateStrings[detectedFrameRateCode]); detectedFrameDelay=1/frameRateTable[GetFrameRateCode(detectedFrameRate)]; frameDelay=1/frameRate; sprintf(sBitrate, "%d bps",bit_rate_value*50*8); } void VideoWrapper::ResetRawPTS() { int i; for(i=0;i<1024;i++) rawPTS[i]=0; } int VideoWrapper::detectFrameRate() { //This megamatic function will try to detect // the framerate of a stream, just looking into // the PTS from the incoming pictures. // This is necessary to detect 24 fps DVD progressive sequences // that have the frame_rate set to 29.97 and use repeat_first_field flag // //Guess stream rate if we are handling a MPEG2 file i64 fPTS,lPTS; int frames=0; int fGOPpos, lGOPpos; int ret; int gopsParsed; inp->SetStreamPos(0); StartReadLPES(); Initialize_Buffer(); //GopNum is updated everytime a Gop Header is parsed //gopFlag flags GOP headers. Must be manually reseted if(isMPEG2){ //find first frame with a PTS do{ ret=Headers(); }while((myPES.PTS==0 || picture_structure==2/*BOTTOM_FIELD*/) && ret ); fPTS=myPES.PTS; fGOPpos=temporal_reference; frames=0; //find next frame with a PTS greater than fPTS gopsParsed=0; do{ gopFlag=false; ret=Headers(); if(gopFlag){ gopsParsed++; } }while((myPES.PTS==0 || picture_structure==2/*BOTTOM_FIELD*/) && ret && myPES.PTS<=fPTS ); lPTS=myPES.PTS; lGOPpos=temporal_reference; frames= gopsParsed ? ((gop_count-fGOPpos)+gop_count*(gopsParsed-1)+lGOPpos):(lGOPpos-fGOPpos); detectedFrameRate= 1/(((double)(lPTS-fPTS)/(double)frames)/(double)MPEG2_CLK_REF); } else{ ret=Headers(); detectedFrameRate= frameRateTable[frame_rate_code]; } return 1; } int VideoWrapper::GetFrameRateCode(double fr) { double min_error,error; int frc,i; min_error=4096; for(i=1; i<=8; i++){ error=ABS((frameRateTable[i]-fr)); if(error<min_error){ min_error=error; frc=i; } } return frc; } i64 VideoWrapper::GetVideoPos() { return inp->GetStreamPos(); } char *VideoWrapper::GetVideoFileName() { return inp->GetFileName(); } i64 VideoWrapper::GetVideoSize() { return inp->GetStreamSize(); }